Close

@InProceedings{MontagnerJrHiraCanu:2016:KeApWo,
               author = "Montagner, Igor S. and Jr., Roberto Hirata and Hirata, Nina S. T. 
                         and Canu, St{\'e}phane",
          affiliation = "{University of S{\~a}o Paulo} and {University of S{\~a}o Paulo} 
                         and {University of S{\~a}o Paulo} and LITIS, INSA de Rouen",
                title = "Kernel approximations for W-operator learning",
            booktitle = "Proceedings...",
                 year = "2016",
               editor = "Aliaga, Daniel G. and Davis, Larry S. and Farias, Ricardo C. and 
                         Fernandes, Leandro A. F. and Gibson, Stuart J. and Giraldi, Gilson 
                         A. and Gois, Jo{\~a}o Paulo and Maciel, Anderson and Menotti, 
                         David and Miranda, Paulo A. V. and Musse, Soraia and Namikawa, 
                         Laercio and Pamplona, Mauricio and Papa, Jo{\~a}o Paulo and 
                         Santos, Jefersson dos and Schwartz, William Robson and Thomaz, 
                         Carlos E.",
         organization = "Conference on Graphics, Patterns and Images, 29. (SIBGRAPI)",
            publisher = "IEEE Computer Society´s Conference Publishing Services",
              address = "Los Alamitos",
             keywords = "Kernel approximation, W-operator learning, Machine learning, Image 
                         Processing.",
             abstract = "Designing image operators is a hard task usually tackled by 
                         specialists in image processing. An alternative approach is to use 
                         machine learning to estimate local transformations, that 
                         characterize the image operators, from pairs of input-output 
                         images. The main challenge of this approach, called 
                         \$W\$-operator learning, is estimating operators over large 
                         windows without overfitting. Current techniques require the 
                         determination of a large number of parameters to maximize the 
                         performance of the trained operators. Support Vector Machines are 
                         known for their generalization performance and their ability to 
                         estimate nonlinear decision surfaces using kernels. However, 
                         training kernelized SVMs in the dual is not feasible when the 
                         training set is large. We estimate the local transformations 
                         employing kernel approximations to train SVMs, thus with no need 
                         to compute the full Gram matrix. We also select appropriate 
                         kernels to process binary and gray level inputs. Experiments show 
                         that operators trained using kernel approximation achieve 
                         comparable results with state-of-the-art methods in 4 public 
                         datasets.",
  conference-location = "S{\~a}o Jos{\'e} dos Campos, SP, Brazil",
      conference-year = "4-7 Oct. 2016",
                  doi = "10.1109/SIBGRAPI.2016.060",
                  url = "http://dx.doi.org/10.1109/SIBGRAPI.2016.060",
             language = "en",
                  ibi = "8JMKD3MGPAW/3M5J6KE",
                  url = "http://urlib.net/ibi/8JMKD3MGPAW/3M5J6KE",
           targetfile = "PID4373017.pdf",
        urlaccessdate = "2024, May 02"
}


Close